In [1]:
import os
import cv2
import glob
import pickle
import numpy as np
import pylab as plt
# import full_pipeline as fp
from IPython.display import Image

# This is deprecated: import Ipython.html.widgets instead:
import ipywidgets

%matplotlib inline
In [2]:
def visualize_before_after(image_before, image_after, cmap=None):
    plt.figure(figsize=(15, 10))
    
    plt.subplot(1,2,1)
    plt.imshow(image_before, cmap=cmap)
    plt.title("Image before")
    
    plt.subplot(1,2,2)
    if cmap:
        plt.imshow(image_after, cmap=cmap)
    else:
        plt.imshow(image_after)
    plt.title("Image after")
In [3]:
test_image = cv2.imread('./images/IMG_0834.JPG')[...,::-1]
In [4]:
_ = plt.imshow(test_image)
In [5]:
sm_kernel = np.ones((1,1),np.float32)/25
blurred = cv2.GaussianBlur(test_image,(5,5),7)
visualize_before_after(test_image, blurred, cmap='gray')
test_image = blurred.copy()
In [6]:
hls = cv2.cvtColor(test_image, cv2.COLOR_RGB2HLS)
h = hls[...,0]
l = hls[...,1]
s = hls[...,2]

plt.figure(figsize=(15, 20))
plt.subplot(1, 3, 1)
plt.imshow(h, cmap='gray')
plt.title("H channel")
_ = plt.axis('off')
plt.subplot(1, 3, 2)
plt.imshow(l, cmap='gray')
plt.title("L channel")
_ = plt.axis('off')
plt.subplot(1, 3, 3)
plt.imshow(s, cmap='gray')
plt.title("S channel")
_ = plt.axis('off')
In [7]:
def color_threshold_slider(lower_th, upper_th):
    """
    """
    thresholded = np.zeros_like(l)
    thresholded[(l > lower_th) & (l < upper_th)] = 255
    plt.imshow(thresholded, cmap='gray')

_ = ipywidgets.interact(color_threshold_slider, lower_th=(0, 255, 1), upper_th=(0, 255, 1))

185 and 2050 looks nice for channel l

In [8]:
plt.figure(figsize=(2,2))
_ = plt.imshow(test_image[1500:1508, 1500:1508])
In [9]:
road_patch = hls[1500:1508, 1500:1508]

h = road_patch[...,0].ravel()
l = road_patch[...,1].ravel()
s = road_patch[...,2].ravel()

plt.figure(figsize=(7, 10))
plt.subplot(3,1,1)
ret_h = plt.hist(h)
_ = plt.title("H values")
plt.subplot(3,1,2)
ret_l = plt.hist(l)
_ = plt.title("L values")
plt.subplot(3,1,3)
ret_s = plt.hist(s)
_ = plt.title("S values")
In [10]:
h_mode = int(ret_h[1][np.argmax(ret_h[0])])
l_mode = int(ret_l[1][np.argmax(ret_l[0])])
s_mode = int(ret_s[1][np.argmax(ret_s[0])])

print("The most frequent value in this patch is: ", (h_mode, l_mode, s_mode))
('The most frequent value in this patch is: ', (108, 228, 0))
In [11]:
lower_th = (92, 211, 0)
upper_th = (112, 231, 58)
# lower_th = (20, 178, 0)
# upper_th = (40, 198, 14)


wl = cv2.inRange(hls, lower_th, upper_th)
visualize_before_after(test_image, wl, cmap='gray')
In [12]:
kernel = np.ones((3,3), np.uint8)
# eroded = cv2.erode(wl, kernel, iterations=2)
dilated = cv2.dilate(wl, kernel, iterations=30)
_ = plt.imshow(dilated, cmap='gray')
In [13]:
im2,contours,hierarchy = cv2.findContours(dilated.copy(),cv2.RETR_TREE,cv2.CHAIN_APPROX_SIMPLE)
areas = [cv2.contourArea(c) for c in contours]
max_index = np.argmax(areas)
cnt = contours[max_index]
rect = cv2.minAreaRect(cnt)
box1 = cv2.boxPoints(rect)
box1 = np.int0(box1)
output_image_fill = cv2.drawContours(test_image.copy(), [box1],0,(0,200,0),-1)
output_image_bound = cv2.drawContours(test_image.copy(), [box1],0,(0,0,200),20)
In [14]:
black = np.zeros(test_image.shape[:2])
binary_box = cv2.drawContours(black, [box1] ,0,(255,255,255),-1)
binary_box = binary_box.astype(np.uint8)
_ = plt.imshow(binary_box, cmap='gray')
In [15]:
_ = plt.imshow(output_image_bound)
In [16]:
asphalt_result = cv2.addWeighted(test_image, 0.5, output_image_bound, 0.5, 0)
asphalt_result = cv2.addWeighted(asphalt_result, 0.7, output_image_fill, 0.3, 0)
plt.imshow(asphalt_result)
Out[16]:
<matplotlib.image.AxesImage at 0x7fa0aa96aa90>
In [17]:
cv2.imwrite("output_box.jpg", asphalt_result[...,::-1])
Out[17]:
True
In [18]:
plt.figure(figsize=(2,2))
_ = plt.imshow(test_image[1110:1126, 1162:1178])
In [19]:
lane_patch = hls[1396:1404, 1160:1168]

h = lane_patch[...,0].ravel()
l = lane_patch[...,1].ravel()
s = lane_patch[...,2].ravel()

plt.figure(figsize=(7, 10))
plt.subplot(3,1,1)
ret_h = plt.hist(h)
_ = plt.title("H values")
plt.subplot(3,1,2)
ret_l = plt.hist(l)
_ = plt.title("L values")
plt.subplot(3,1,3)
ret_s = plt.hist(s)
_ = plt.title("S values")
In [20]:
h_mode = int(ret_h[1][np.argmax(ret_h[0])])
l_mode = int(ret_l[1][np.argmax(ret_l[0])])
s_mode = int(ret_s[1][np.argmax(ret_s[0])])

print("The most frequent value in this patch is: ", (h_mode, l_mode, s_mode))
('The most frequent value in this patch is: ', (102, 213, 27))
In [21]:
lower_th = (0, 245, 0)
upper_th = (10, 253, 10)

wl = cv2.inRange(hls, lower_th, upper_th)
visualize_before_after(test_image, wl, cmap='gray')
In [22]:
kernel = np.ones((3,3), np.uint8)
dilated = cv2.dilate(wl, kernel, iterations=2)
# dilated = cv2.erode(dilated, kernel, iterations=45)



visualize_before_after(test_image, dilated, cmap='gray')
In [23]:
im2,contours,hierarchy = cv2.findContours(dilated.copy(),cv2.RETR_TREE,cv2.CHAIN_APPROX_SIMPLE)
areas = [cv2.contourArea(c) for c in contours]
areas = sorted(areas, key=np.argmax, reverse=True)
output_image_fill = test_image.copy()
output_image_bound = test_image.copy()
for idx,area in enumerate(areas):
    cnt = contours[idx-1]
    rect = cv2.minAreaRect(cnt)
    box = cv2.boxPoints(rect)
    box = np.int0(box)
    output_image_fill = cv2.drawContours(output_image_fill.copy(), [box],0,(0,200,0),-1)
    output_image_bound = cv2.drawContours(output_image_bound.copy(), [box],0,(0,0,200),20)
In [24]:
_ = plt.imshow(output_image_bound)
In [25]:
result = np.bitwise_and(binary_box, dilated)
In [26]:
_ = plt.imshow(result)
In [27]:
def draw_lines(img, lines, color=[255, 0, 0], thickness=2):
    """
    NOTE: this is the function you might want to use as a starting point once you want to 
    average/extrapolate the line segments you detect to map out the full
    extent of the lane (going from the result shown in raw-lines-example.mp4
    to that shown in P1_example.mp4).  
    
    Think about things like separating line segments by their 
    slope ((y2-y1)/(x2-x1)) to decide which segments are part of the left
    line vs. the right line.  Then, you can average the position of each of 
    the lines and extrapolate to the top and bottom of the lane.
    
    This function draws `lines` with `color` and `thickness`.    
    Lines are drawn on the image inplace (mutates the image).
    If you want to make the lines semi-transparent, think about combining
    this function with the weighted_img() function below
    """
    for line in lines:
        for x1,y1,x2,y2 in line:
            cv2.line(img, (x1, y1), (x2, y2), color, thickness)

def hough_lines(img, rho, theta, threshold, min_line_len, max_line_gap):
    """
    `img` should be the output of a Canny transform.
        
    Returns an image with hough lines drawn.
    """
    lines = cv2.HoughLinesP(img, rho, theta, threshold, np.array([]), minLineLength=min_line_len, maxLineGap=max_line_gap)
    a,b = img.shape
    line_img = np.zeros((a,b, 3), dtype=np.uint8)
    draw_lines(line_img, lines)
    return lines, line_img # RIC Modified
In [28]:
rho = 2
theta = np.pi/180 
threshold = 500
min_line_len = 500 
max_line_gap = 25
    

lines, hough2 = hough_lines(result, rho, theta, threshold, min_line_len, max_line_gap)
In [29]:
print(lines[:3])
_ = plt.imshow(hough2)
[[[ 890 1237  921 2999]]

 [[ 866    0  887 1172]]

 [[1827    0 1916 2555]]]
In [30]:
# Drawing lines by approximating the equations.
lines_f = [f.flatten() for f in lines]
# Get X coordinates (Just one is enough)
ys = [l[0] for l in lines_f]
# Max and min values of X
maximum = ys[np.argmax(ys)]
minimum = ys[np.argmin(ys)]
middle_point = (maximum + minimum) / 2
print "max,min = [%s,%s] -- > middle_point = %s" %(maximum,minimum, middle_point)

# Drawing left lane line
left_group = [line for line in lines_f if line[0] < middle_point or line[2] < middle_point]
right_group = [line for line in lines_f if line[0] > middle_point or line[2] > middle_point]
print "Left group length: %s" %len(left_group)
print "Right group length: %s" %len(left_group)

# Getting lines as mx + b
import warnings
warnings.simplefilter('ignore', np.RankWarning)

xx_1 = np.array([(x[0],x[2]) for x in left_group]).flatten()
yy_1 = np.array([(x[1],x[3]) for x in left_group]).flatten()
best_fit_left = np.polyfit(xx_1,yy_1,1)

xx_2 = np.array([(x[0],x[2]) for x in right_group]).flatten()
yy_2 = np.array([(x[1],x[3]) for x in right_group]).flatten()
best_fit_right = np.polyfit(xx_2,yy_2,1)


# Approximating equation
left_mx = best_fit_left[0]
left_b = best_fit_left[1]
print "Left group mean line: Y = %sX + %s" %(left_mx, left_b)

right_mx = best_fit_right[0]
right_b = best_fit_right[1]
print "Left group mean line: Y = %sX + %s" %(right_mx, right_b)

# Obtaining two points for drawing the line.
## Left side
print "Obtaining x1"
print "x1 = (y-b)/m ---> x1 = (%s - %s) / %s" %(0, left_b, left_mx)
lx_1 = int(np.round((0-left_b)/(left_mx)))
print "x1 = %s" %lx_1
print "x2 = (y-b)/m ---> x2 = (%s - %s) / %s" %(hough2.shape[0], left_b, left_mx)
lx_2 = int(np.round((hough2.shape[0]-left_b)/left_mx))
print "x2 = %s" %lx_2
left_points = (lx_1,0,lx_2,hough2.shape[0])
res = None

## Right side
print "Obtaining x1"
print "x1 = (y-b)/m ---> x1 = (%s - %s) / %s" %(0, right_b, right_mx)
rx_1 = int(np.round((0-right_b)/(right_mx)))
print "x1 = %s" %lx_1
print "x2 = (y-b)/m ---> x2 = (%s - %s) / %s" %(hough2.shape[0], right_b, right_mx)
rx_2 = int(np.round((hough2.shape[0]-right_b)/right_mx))
print "x2 = %s" %lx_2
right_points = (rx_1,0,rx_2,hough2.shape[0])
drawed_left = None

# draw the line
drawed_left = cv2.line(hough2.copy(),(left_points[0], left_points[1]), (left_points[2], left_points[3]), (0,255,0), 20)
all_drawed = cv2.line(drawed_left.copy(),(right_points[0], right_points[1]), (right_points[2], right_points[3]), (0,255,0), 20)
_ = plt.imshow(all_drawed)
max,min = [1898,859] -- > middle_point = 1378
Left group length: 31
Right group length: 31
Left group mean line: Y = 47.3049069057X + -40723.6554875
Left group mean line: Y = 28.8579682038X + -52956.0011854
Obtaining x1
x1 = (y-b)/m ---> x1 = (0 - -40723.6554875) / 47.3049069057
x1 = 861
x2 = (y-b)/m ---> x2 = (3000 - -40723.6554875) / 47.3049069057
x2 = 924
Obtaining x1
x1 = (y-b)/m ---> x1 = (0 - -52956.0011854) / 28.8579682038
x1 = 861
x2 = (y-b)/m ---> x2 = (3000 - -52956.0011854) / 28.8579682038
x2 = 924
In [31]:
# In[115]:

def computeShoulderROI(roadROI, margin):
        '''
        This method compute 2 ROIs delimiting the shoulders' region. The idea is to find out if in a
        considered safety region is there any unwanted object, i.e. vegetation
        
        Parameters
        ----------
        roadROI : list of list
            It shows where the road is located in an image.
            It has the following format:
            [[x_bl, y_bl],
             [x_tl, y_tl],
             [x_tr, y_tr],
             [x_br, y_br]]
             
             (bl -> bottom left, tr -> top right)
            
        margin : int
            It's the security margin in pixels from the limit of the lane
            
        Returns
        -------
        left_shoulder_ROI : array_like
        right_shoulder_ROI
        ''' 
        
        left_line = roadROI[:2]
        right_line = roadROI[2:]
        
        # Left shoulder
        left_shoulder = []
        bl = left_line[0]
        tl = left_line[1]
        left_shoulder.append(np.array([bl[0]-margin, bl[1]]))
        left_shoulder.append(np.array([tl[0]-margin, tl[1]]))
        left_shoulder.append(left_line[1])
        left_shoulder.append(left_line[0])
        
        # Right shoulder
        right_shoulder = []
        br = right_line[0]
        tr = right_line[1]
        right_shoulder.append(right_line[1])
        right_shoulder.append(right_line[0])
        right_shoulder.append(np.array([br[0]+margin, br[1]]))
        right_shoulder.append(np.array([tr[0]+margin, tr[1]]))
        
        
        
        return [left_shoulder, right_shoulder]
In [32]:
def drawROI(image, ROI, edgeColor=(0,0,200), foregroundColor=(0,200,0)):
    
    output_image = image.copy()
    hull = cv2.convexHull(np.array(ROI),returnPoints = True)
    output_image_foreground = cv2.drawContours(output_image.copy(), [hull],0,foregroundColor,-1)
    output_image_edge = cv2.drawContours(output_image.copy(), [hull],0,edgeColor,20)
    

    result = cv2.addWeighted(output_image, 0.5, output_image_edge, 0.5, 0)
    result = cv2.addWeighted(result, 0.7, output_image_foreground, 0.3, 0)
    
    return result
In [33]:
# asphalt_result = cv2.addWeighted(test_image, 0.5, output_image_bound, 0.5, 0)
# asphalt_result = cv2.addWeighted(asphalt_result, 0.7, output_image_fill, 0.3, 0)
# c_im = drawROI(test_image.copy(), box1)
# plt.imshow(c_im)
In [34]:
roadROI = [[left_points[2],left_points[3]],
          [left_points[0],left_points[1]],
          [right_points[2],right_points[3]],
          [right_points[0],right_points[1]]]

margin = 250


shoulderROIs = computeShoulderROI(roadROI, margin)

colored_im = test_image.copy()

for shoulder in shoulderROIs: 
    colored_im = drawROI(colored_im, shoulder, foregroundColor=(200,100,0))

_ = plt.figure(figsize=(20,10))
_ = plt.imshow(colored_im)
In [35]:
shoulderROIs[0]
Out[35]:
[array([ 674, 3000]), array([611,   0]), [861, 0], [924, 3000]]
In [36]:
colored_im = drawROI(colored_im.copy(), roadROI)
plt.imshow(colored_im)
Out[36]:
<matplotlib.image.AxesImage at 0x7fa0a80ed810>
In [37]:
left_shoulder = shoulderROIs[0]
xs = [x[0] for x in left_shoulder]
left_side = test_image[:,min(xs):max(xs)]
left_bin = result[:,min(xs):max(xs)]

cv2.imwrite("../Data/left_side.JPEG", left_side)

right_shoulder = shoulderROIs[1]
xs1 = [x[0] for x in right_shoulder]
right_side = test_image[:, min(xs1):max(xs1)]
right_bin = result[:, min(xs1):max(xs1)]
cv2.imwrite("../Data/right_side.JPEG", right_side)
plt.imshow(right_side)
Out[37]:
<matplotlib.image.AxesImage at 0x7fa0a8a87490>
In [38]:
import SimpleITK as sitk
from myshow import myshow, myshow3d

# Download data to work on
%run update_path_to_download_script
from downloaddata import fetch_data as fdata
<matplotlib.figure.Figure at 0x7fa0aa72b450>
In [39]:
img_ori = sitk.GetImageFromArray(test_image.copy())
feature_img_ori = sitk.GradientMagnitude(img_ori)
ws_img_ori = sitk.MorphologicalWatershed(feature_img_ori, level=4, markWatershedLine=True, fullyConnected=False)
myshow(ws_img_ori)
In [40]:
# img_ori = sitk.VectorIndexSelectionCast(sitk.ReadImage(fdata("IMG_0834_personas.JPG")),1)
# feature_img_ori = sitk.GradientMagnitude(img_ori)
# ws_img_ori = sitk.MorphologicalWatershed(feature_img_ori, level=50, markWatershedLine=True, fullyConnected=False)
# myshow(sitk.LabelToRGB(ws_img_ori), "Watershed Over Segmentation")
In [41]:
# img_izq = sitk.VectorIndexSelectionCast(sitk.ReadImage(fdata("left_side.JPEG")),1)
# img_dch = sitk.VectorIndexSelectionCast(sitk.ReadImage(fdata("right_side.JPEG")),1)
img_izq = sitk.GetImageFromArray(left_side)
img_dch = sitk.GetImageFromArray(right_side)
In [42]:
feature_img_izq = sitk.GradientMagnitude(img_izq)
feature_img_dch = sitk.GradientMagnitude(img_dch)
In [43]:
ws_img_izq = sitk.MorphologicalWatershed(feature_img_izq, level=4, markWatershedLine=True, fullyConnected=False)
ws_img_dch = sitk.MorphologicalWatershed(feature_img_dch, level=4, markWatershedLine=True, fullyConnected=False)
as_array_izq = sitk.GetArrayFromImage(ws_img_izq)
as_array_dch = sitk.GetArrayFromImage(ws_img_dch)
In [44]:
plt.figure(figsize=(15,10))
plt.subplot(1,4,1)
plt.imshow(as_array_izq)
plt.subplot(1,4,2)
plt.imshow(left_side)
plt.subplot(1,4,3)
plt.imshow(as_array_dch)
plt.subplot(1,4,4)
_ = plt.imshow(right_side)
In [45]:
formatted_izq = as_array_izq.astype('uint8')
formatted_dch = as_array_dch.astype('uint8')
_, thrs_izq = cv2.threshold(formatted_izq, 5, 255, cv2.THRESH_BINARY)
_, thrs_dch = cv2.threshold(formatted_dch, 5, 255, cv2.THRESH_BINARY)
percentage_izq = float(thrs_izq[thrs_izq >= 100].size) / float(thrs_izq.size)
percentage_dch = float(thrs_dch[thrs_dch >= 100].size) / float(thrs_dch.size)
occ_izq = "Left shoulder occupancy: %.2f%s" %(percentage_izq * 100, '%')
occ_dch = "Right shoulder occupancy: %.2f%s" %(percentage_dch * 100, '%')
In [46]:
# plt.figure(figsize=(15,10))
# _ = plt.hist(as_array_dch)
# _ = plt.xticks(range(0,600,20))
In [47]:
plt.figure(figsize=(15,10))
plt.subplot(1,2,1)
plt.imshow(thrs_izq)
plt.subplot(1,2,2)
_ = plt.imshow(thrs_dch, cmap='gray')
In [48]:
finish_him = cv2.putText(colored_im.copy(),occ_izq, (100,150), cv2.FONT_HERSHEY_COMPLEX, 2.5, (0,0,150), 7)
finish_him = cv2.putText(finish_him.copy(),occ_dch, (100,250), cv2.FONT_HERSHEY_COMPLEX, 2.5, (0,0,150), 7)
In [49]:
resized = cv2.resize(finish_him.copy(), (0,0), fx=0.3, fy=0.3) 
plt.figure(figsize=(20,10))
_ = plt.imshow(resized)
In [ ]: